# the Boston data from the MASS package
# access the MASS package
library(MASS)
# load the data
data("Boston")
# explore the dataset
str(Boston)
## 'data.frame': 506 obs. of 14 variables:
## $ crim : num 0.00632 0.02731 0.02729 0.03237 0.06905 ...
## $ zn : num 18 0 0 0 0 0 12.5 12.5 12.5 12.5 ...
## $ indus : num 2.31 7.07 7.07 2.18 2.18 2.18 7.87 7.87 7.87 7.87 ...
## $ chas : int 0 0 0 0 0 0 0 0 0 0 ...
## $ nox : num 0.538 0.469 0.469 0.458 0.458 0.458 0.524 0.524 0.524 0.524 ...
## $ rm : num 6.58 6.42 7.18 7 7.15 ...
## $ age : num 65.2 78.9 61.1 45.8 54.2 58.7 66.6 96.1 100 85.9 ...
## $ dis : num 4.09 4.97 4.97 6.06 6.06 ...
## $ rad : int 1 2 2 3 3 3 5 5 5 5 ...
## $ tax : num 296 242 242 222 222 222 311 311 311 311 ...
## $ ptratio: num 15.3 17.8 17.8 18.7 18.7 18.7 15.2 15.2 15.2 15.2 ...
## $ black : num 397 397 393 395 397 ...
## $ lstat : num 4.98 9.14 4.03 2.94 5.33 ...
## $ medv : num 24 21.6 34.7 33.4 36.2 28.7 22.9 27.1 16.5 18.9 ...
The dataset is Housing Values in Suburbs of Boston This data frame contains the following columns: crim, per capita crime rate by town. zn, proportion of residential land zoned for lots over 25,000 sq.ft. indus, proportion of non-retail business acres per town. chas, Charles River dummy variable (= 1 if tract bounds river; 0 otherwise). nox, nitrogen oxides concentration (parts per 10 million). rm, average number of rooms per dwelling. age, proportion of owner-occupied units built prior to 1940. dis, weighted mean of distances to five Boston employment centres. rad, index of accessibility to radial highways. tax, full-value property-tax rate per $10,000. ptratio, pupil-teacher ratio by town. black, 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town. lstat, lower status of the population (percent). medv, median value of owner-occupied homes in $1000s.
chas and rad are of type integer, the rest of the variables are of type number.
summary(Boston)
## crim zn indus chas
## Min. : 0.00632 Min. : 0.00 Min. : 0.46 Min. :0.00000
## 1st Qu.: 0.08205 1st Qu.: 0.00 1st Qu.: 5.19 1st Qu.:0.00000
## Median : 0.25651 Median : 0.00 Median : 9.69 Median :0.00000
## Mean : 3.61352 Mean : 11.36 Mean :11.14 Mean :0.06917
## 3rd Qu.: 3.67708 3rd Qu.: 12.50 3rd Qu.:18.10 3rd Qu.:0.00000
## Max. :88.97620 Max. :100.00 Max. :27.74 Max. :1.00000
## nox rm age dis
## Min. :0.3850 Min. :3.561 Min. : 2.90 Min. : 1.130
## 1st Qu.:0.4490 1st Qu.:5.886 1st Qu.: 45.02 1st Qu.: 2.100
## Median :0.5380 Median :6.208 Median : 77.50 Median : 3.207
## Mean :0.5547 Mean :6.285 Mean : 68.57 Mean : 3.795
## 3rd Qu.:0.6240 3rd Qu.:6.623 3rd Qu.: 94.08 3rd Qu.: 5.188
## Max. :0.8710 Max. :8.780 Max. :100.00 Max. :12.127
## rad tax ptratio black
## Min. : 1.000 Min. :187.0 Min. :12.60 Min. : 0.32
## 1st Qu.: 4.000 1st Qu.:279.0 1st Qu.:17.40 1st Qu.:375.38
## Median : 5.000 Median :330.0 Median :19.05 Median :391.44
## Mean : 9.549 Mean :408.2 Mean :18.46 Mean :356.67
## 3rd Qu.:24.000 3rd Qu.:666.0 3rd Qu.:20.20 3rd Qu.:396.23
## Max. :24.000 Max. :711.0 Max. :22.00 Max. :396.90
## lstat medv
## Min. : 1.73 Min. : 5.00
## 1st Qu.: 6.95 1st Qu.:17.02
## Median :11.36 Median :21.20
## Mean :12.65 Mean :22.53
## 3rd Qu.:16.95 3rd Qu.:25.00
## Max. :37.97 Max. :50.00
summary shows the min, max, and the first, the second (meadian), and the third quantum of each variable of the dataset.
dim(Boston)
## [1] 506 14
The dataset has 506 rows and 14 columns.
# plot matrix of the variables
pairs(Boston[-1])
Nox and dis, rm and lstat, rm and medv, lstat and medv, have some kind of linear pattern.
library(corrplot)
## corrplot 0.84 loaded
library(tidyverse)
## -- Attaching packages --------------------------------------- tidyverse 1.3.0 --
## v ggplot2 3.3.2 v purrr 0.3.4
## v tibble 3.0.4 v dplyr 1.0.2
## v tidyr 1.1.2 v stringr 1.4.0
## v readr 1.4.0 v forcats 0.5.0
## -- Conflicts ------------------------------------------ tidyverse_conflicts() --
## x dplyr::filter() masks stats::filter()
## x dplyr::lag() masks stats::lag()
## x dplyr::select() masks MASS::select()
# calculate the correlation matrix and round it
cor_matrix<-cor(Boston)
# print the correlation matrix
corrplot(cor_matrix, method="circle")
crim correlates strongly with rad and tax, zn with dis, indus with nox, age, rad, tax, lstat and dis, nox with indus, age, rad, tax, lstst and dis, rm with medv, age with indus, nox, lstat and lstat, dis with zn, indus, nox and age, rad with crim, indus, nox and especially tax, tax with crim, indus, nox, lstat and especially rad, lstat with indus, rm, nox, age, medv, medv with rm and lstat.
library(GGally)
## Registered S3 method overwritten by 'GGally':
## method from
## +.gg ggplot2
library(ggplot2)
p <- ggpairs(Boston, mapping = aes(), lower = list(combo = wrap("facethist", bins = 20)))
p
Only rm looks like it’s almost normally distributed. The data needs to be scaled.
# center and standardize variables
boston_scaled <- scale(Boston)
# summaries of the scaled variables
summary(boston_scaled)
## crim zn indus chas
## Min. :-0.419367 Min. :-0.48724 Min. :-1.5563 Min. :-0.2723
## 1st Qu.:-0.410563 1st Qu.:-0.48724 1st Qu.:-0.8668 1st Qu.:-0.2723
## Median :-0.390280 Median :-0.48724 Median :-0.2109 Median :-0.2723
## Mean : 0.000000 Mean : 0.00000 Mean : 0.0000 Mean : 0.0000
## 3rd Qu.: 0.007389 3rd Qu.: 0.04872 3rd Qu.: 1.0150 3rd Qu.:-0.2723
## Max. : 9.924110 Max. : 3.80047 Max. : 2.4202 Max. : 3.6648
## nox rm age dis
## Min. :-1.4644 Min. :-3.8764 Min. :-2.3331 Min. :-1.2658
## 1st Qu.:-0.9121 1st Qu.:-0.5681 1st Qu.:-0.8366 1st Qu.:-0.8049
## Median :-0.1441 Median :-0.1084 Median : 0.3171 Median :-0.2790
## Mean : 0.0000 Mean : 0.0000 Mean : 0.0000 Mean : 0.0000
## 3rd Qu.: 0.5981 3rd Qu.: 0.4823 3rd Qu.: 0.9059 3rd Qu.: 0.6617
## Max. : 2.7296 Max. : 3.5515 Max. : 1.1164 Max. : 3.9566
## rad tax ptratio black
## Min. :-0.9819 Min. :-1.3127 Min. :-2.7047 Min. :-3.9033
## 1st Qu.:-0.6373 1st Qu.:-0.7668 1st Qu.:-0.4876 1st Qu.: 0.2049
## Median :-0.5225 Median :-0.4642 Median : 0.2746 Median : 0.3808
## Mean : 0.0000 Mean : 0.0000 Mean : 0.0000 Mean : 0.0000
## 3rd Qu.: 1.6596 3rd Qu.: 1.5294 3rd Qu.: 0.8058 3rd Qu.: 0.4332
## Max. : 1.6596 Max. : 1.7964 Max. : 1.6372 Max. : 0.4406
## lstat medv
## Min. :-1.5296 Min. :-1.9063
## 1st Qu.:-0.7986 1st Qu.:-0.5989
## Median :-0.1811 Median :-0.1449
## Mean : 0.0000 Mean : 0.0000
## 3rd Qu.: 0.6024 3rd Qu.: 0.2683
## Max. : 3.5453 Max. : 2.9865
The scale (min and max) has changed for all the variables.
# change the object to data frame so that it will be easier to use the data
boston_scaled <- as.data.frame(boston_scaled)
class(boston_scaled)
## [1] "data.frame"
Our next job is to create a categorical variable of the crime rate in the Boston dataset (from the scaled crime rate) using quantiles as the break points.
# summary of the scaled crime rate
summary(boston_scaled$crim)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## -0.419367 -0.410563 -0.390280 0.000000 0.007389 9.924110
The min value is -0.42 and the max value is 9.92. The 1. quantile is -0.41, the second is -0.39 and the third is 0.007.
# create a quantile vector of crim
bins <- quantile(boston_scaled$crim)
bins
## 0% 25% 50% 75% 100%
## -0.419366929 -0.410563278 -0.390280295 0.007389247 9.924109610
These would be the limits for each category.
# create a categorical variable 'crime'
crime <- cut(boston_scaled$crim, breaks = bins, include.lowest = TRUE)
# look at the table of the new factor crime
table(crime)
## crime
## [-0.419,-0.411] (-0.411,-0.39] (-0.39,0.00739] (0.00739,9.92]
## 127 126 126 127
127 values have been assigned to first and last category, 126 to the second and third. Values between -0.419 and -0.411 are in category one. Values between -0.411 and -0.39 are in category two. Values between -0.39 and 0.00739 are in category three. Values between 0.00739 and 9.92 are in category four. Let’s lable those categories with labels low, med_low, med_high, and high.
crime <- cut(boston_scaled$crim, breaks = bins, labels=c("low", "med_low", "med_high", "high"), include.lowest = TRUE)
table(crime)
## crime
## low med_low med_high high
## 127 126 126 127
Now the categories have names. Next we can remove the original variable (crim) from the scaled dataset.
boston_scaled <- dplyr::select(boston_scaled, -crim)
colnames(boston_scaled)
## [1] "zn" "indus" "chas" "nox" "rm" "age" "dis"
## [8] "rad" "tax" "ptratio" "black" "lstat" "medv"
And then we can add the new categorized variable (crime) to the dataset.
boston_scaled <- data.frame(boston_scaled, crime)
summary(boston_scaled)
## zn indus chas nox
## Min. :-0.48724 Min. :-1.5563 Min. :-0.2723 Min. :-1.4644
## 1st Qu.:-0.48724 1st Qu.:-0.8668 1st Qu.:-0.2723 1st Qu.:-0.9121
## Median :-0.48724 Median :-0.2109 Median :-0.2723 Median :-0.1441
## Mean : 0.00000 Mean : 0.0000 Mean : 0.0000 Mean : 0.0000
## 3rd Qu.: 0.04872 3rd Qu.: 1.0150 3rd Qu.:-0.2723 3rd Qu.: 0.5981
## Max. : 3.80047 Max. : 2.4202 Max. : 3.6648 Max. : 2.7296
## rm age dis rad
## Min. :-3.8764 Min. :-2.3331 Min. :-1.2658 Min. :-0.9819
## 1st Qu.:-0.5681 1st Qu.:-0.8366 1st Qu.:-0.8049 1st Qu.:-0.6373
## Median :-0.1084 Median : 0.3171 Median :-0.2790 Median :-0.5225
## Mean : 0.0000 Mean : 0.0000 Mean : 0.0000 Mean : 0.0000
## 3rd Qu.: 0.4823 3rd Qu.: 0.9059 3rd Qu.: 0.6617 3rd Qu.: 1.6596
## Max. : 3.5515 Max. : 1.1164 Max. : 3.9566 Max. : 1.6596
## tax ptratio black lstat
## Min. :-1.3127 Min. :-2.7047 Min. :-3.9033 Min. :-1.5296
## 1st Qu.:-0.7668 1st Qu.:-0.4876 1st Qu.: 0.2049 1st Qu.:-0.7986
## Median :-0.4642 Median : 0.2746 Median : 0.3808 Median :-0.1811
## Mean : 0.0000 Mean : 0.0000 Mean : 0.0000 Mean : 0.0000
## 3rd Qu.: 1.5294 3rd Qu.: 0.8058 3rd Qu.: 0.4332 3rd Qu.: 0.6024
## Max. : 1.7964 Max. : 1.6372 Max. : 0.4406 Max. : 3.5453
## medv crime
## Min. :-1.9063 low :127
## 1st Qu.:-0.5989 med_low :126
## Median :-0.1449 med_high:126
## Mean : 0.0000 high :127
## 3rd Qu.: 0.2683
## Max. : 2.9865
Now the data is ready and we can start working with it. First we divide the data into training (80%) and testing (20%) sets.
# number of rows in the Boston dataset
n <- 506
# choose randomly 80% of the rows
ind <- sample(n, size = n * 0.8)
# create train set from that 80%
train <- boston_scaled[ind,]
# create test set from the remaining data
test <- boston_scaled[-ind,]
train dataset has 404 rows and 14 columns. test dataset has 102 rows and 14 columns. Let’s train a Linear Discriminant analysis (LDA) classification model. Crime is the target variable.
lda.fit <- lda(crime ~ . , data = train)
lda.fit
## Call:
## lda(crime ~ ., data = train)
##
## Prior probabilities of groups:
## low med_low med_high high
## 0.2574257 0.2376238 0.2549505 0.2500000
##
## Group means:
## zn indus chas nox rm age
## low 0.88317906 -0.8902253 -0.12090214 -0.8615174 0.3830462 -0.8684904
## med_low -0.07454775 -0.3285648 -0.02626030 -0.5853717 -0.1168088 -0.4223049
## med_high -0.38566716 0.1834826 0.18636222 0.3769801 0.1534299 0.3890154
## high -0.48724019 1.0171306 -0.07742312 1.0749471 -0.4203953 0.7939526
## dis rad tax ptratio black lstat
## low 0.8445345 -0.6815029 -0.7385764 -0.4062791 0.37486003 -0.744110145
## med_low 0.3956054 -0.5356439 -0.4665618 -0.1040790 0.35217080 -0.156699496
## med_high -0.3525250 -0.3875674 -0.2953702 -0.2386659 0.09531207 0.002194999
## high -0.8350021 1.6379981 1.5139626 0.7806252 -0.87697636 0.868385488
## medv
## low 0.4965904
## med_low 0.0248613
## med_high 0.2209654
## high -0.6923331
##
## Coefficients of linear discriminants:
## LD1 LD2 LD3
## zn 0.09939220 0.668799449 -1.04791124
## indus 0.01998890 -0.292442101 0.22484268
## chas -0.07675563 -0.045672386 0.09644320
## nox 0.37926099 -0.786311424 -1.35357468
## rm -0.08494673 -0.194723685 -0.12750701
## age 0.30504980 -0.275130522 -0.22613612
## dis -0.02700884 -0.276488511 0.06724811
## rad 2.96994857 1.048700513 -0.21425709
## tax 0.03814018 -0.104942790 0.81746662
## ptratio 0.10320928 -0.028673741 -0.44256186
## black -0.14321087 0.002600549 0.17479817
## lstat 0.22791600 -0.252764719 0.41672233
## medv 0.18806243 -0.421087555 -0.21917243
##
## Proportion of trace:
## LD1 LD2 LD3
## 0.9459 0.0402 0.0139
LD1 seems to be 95.76% whereas the other LDs are not very good. Let’s define the arrows, create a numeric vector of the train sets crime classes, and draw a biplot
lda.arrows <- function(x, myscale = 1, arrow_heads = 0.1, color = "red", tex = 0.75, choices = c(1,2)){
heads <- coef(x)
arrows(x0 = 0, y0 = 0,
x1 = myscale * heads[,choices[1]],
y1 = myscale * heads[,choices[2]], col=color, length = arrow_heads)
text(myscale * heads[,choices], labels = row.names(heads),
cex = tex, col=color, pos=3)
}
classes <- as.numeric(train$crime)
plot(lda.fit, dimen = 2, col = classes, pch = classes)
The colour indicates each category. Let’s add the arrows we specified earlier.
plot(lda.fit, dimen = 2, col = classes, pch = classes)
lda.arrows(lda.fit, myscale = 5)
Next we will take the crime classes from the test and save them as correct_classes (so that we can compare to it when testing) and remove the crime variable from the test dataset so that we can predict is using the model we will build.
correct_classes <- test$crime
class(correct_classes)
## [1] "factor"
test <- dplyr::select(test, -crime)
colnames(test)
## [1] "zn" "indus" "chas" "nox" "rm" "age" "dis"
## [8] "rad" "tax" "ptratio" "black" "lstat" "medv"
There is no longer crime variable in the test dataset. Let’s use the model and predict using the test dataset. Then we compare the predictions to the correct_classes.
lda.pred <- predict(lda.fit, newdata = test)
table(correct = correct_classes, predicted = lda.pred$class)
## predicted
## correct low med_low med_high high
## low 17 6 0 0
## med_low 3 20 7 0
## med_high 2 6 15 0
## high 0 0 0 26
For the high category the model made excellent predictions, 19/19. For med_high 12/23, for med_low 17/26, and for low 25/34 was correctly predicted.
Clustering
# load the Boston dataset, scale it and create the euclidean distance matrix
library(MASS)
data('Boston')
boston_scaled <- scale(Boston)
boston_scaled <- as.data.frame(boston_scaled)
dist_eu <- dist(boston_scaled, method = "euclidean", diag = FALSE, upper = FALSE, p = 4)
summary(dist_eu)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.1343 3.4625 4.8241 4.9111 6.1863 14.3970
euclidean distance is a usual distance between the two vectors
Let’s calculate the manhattan distance.
dist_man <- dist(boston_scaled, method = "manhattan", diag = FALSE, upper = FALSE, p = 4)
summary(dist_man)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.2662 8.4832 12.6090 13.5488 17.7568 48.8618
manhattan distance is an absolute distance between the two vectors
K-means clustering
km <-kmeans(boston_scaled, centers = 4)
pairs(boston_scaled, col = km$cluster)
Above we can see K-means clustering using 4 clusters, each identified by a different color.
What is the best k, number of clusters? One way to determine k is to look at how the total of within cluster sum of squares (WCSS) behaves when the number of cluster changes. When you plot the number of clusters and the total WCSS, the optimal number of clusters is when the total WCSS drops radically. Note that K-means randomly assigns the initial cluster centers and therefore might produce different results every time.
set.seed(900)
k_max <- 10
twcss <- sapply(1:k_max, function(k){kmeans(boston_scaled, k)$tot.withinss})
qplot(x = 1:k_max, y = twcss, geom = 'line')
It looks like 2 is the optimal number of clusters since the curve changes dramatically on k=2.
Let’s create k-means using 2 as number of clusters.
km <-kmeans(boston_scaled, centers = 2)
pairs(boston_scaled, col = km$cluster)
med and rm, rm and lstat, rm and medv are the only ones having linear pattern. medv and lstat, dis and nox have a curved, non-linear pattern.
Bonus.
library(MASS)
data('Boston')
boston_scaled <- scale(Boston)
boston_scaled <- as.data.frame(boston_scaled)
boston_scaled <- dplyr::select(boston_scaled, -crim)
n <- 506
ind <- sample(n, size = n * 0.8)
ktrain <- boston_scaled[ind,]
ktest <- boston_scaled[-ind,]
km <-kmeans(ktrain, centers = 4)
#length(km)
lda.fit <- lda(km$cluster ~ . , data = ktrain)
lda.fit
## Call:
## lda(km$cluster ~ ., data = ktrain)
##
## Prior probabilities of groups:
## 1 2 3 4
## 0.1064356 0.3143564 0.4133663 0.1658416
##
## Group means:
## zn indus chas nox rm age
## 1 -0.02556311 -0.4214017 1.65044081 -0.06341642 1.3347678 0.2238756
## 2 -0.48724019 1.1535174 -0.08632433 1.13408537 -0.4174781 0.8283821
## 3 -0.35206167 -0.4075331 -0.27232907 -0.42141667 -0.2310348 -0.1412526
## 4 1.77276888 -1.0794322 -0.27232907 -1.12647984 0.5809091 -1.4036878
## dis rad tax ptratio black lstat medv
## 1 -0.3483776 -0.3942834 -0.6093748 -1.02573014 0.2939814 -0.7238248 1.3805896
## 2 -0.8624951 1.1061684 1.2066260 0.60355843 -0.5855684 0.8635993 -0.7237526
## 3 0.1691226 -0.6043213 -0.6192280 0.05262372 0.3101287 -0.1548870 -0.1081300
## 4 1.4940692 -0.6064768 -0.5669409 -0.61647652 0.3518842 -0.8690652 0.6220355
##
## Coefficients of linear discriminants:
## LD1 LD2 LD3
## zn 0.003479948 -1.311689426 -0.761115369
## indus 0.936737602 -0.407503993 -0.181794321
## chas -0.167644631 0.631026345 -0.770943356
## nox 0.896989707 -0.452138083 -0.272352528
## rm -0.034025553 0.165801674 -0.615581321
## age -0.044459412 0.599126833 0.012642565
## dis -0.088521463 -0.629471813 0.005214464
## rad 0.642699177 0.117578513 -0.364357886
## tax 0.422662032 -0.667438098 -0.131882972
## ptratio 0.265080739 -0.157872219 0.136575290
## black -0.056390985 -0.002398193 0.054300281
## lstat 0.311829110 0.026941745 -0.480960215
## medv 0.064842317 0.292044772 -0.831575220
##
## Proportion of trace:
## LD1 LD2 LD3
## 0.6545 0.2024 0.1431
Prior probabilities of groups: the proportion of training observations in each group. Prior probabilities of groups: 1 2 3 4 0.09405941 0.40346535 0.16089109 0.34158416 For example 40% of the observations belong to group 2. Group means: group center of gravity, the mean of each variable in each group. Coefficients of linear discriminants: the linear combination of predictor variables that are used to form the LDA decision rule. For example LD1 = -0.13zn + 0.80indus - 0.15chas + 0.96nox + 0.09rm - 0.15age - 0.08dis + 0.58rad + 0.56tax + 0.22ptratio + 0.01black + 0.26lstat - 0.31*medv Proportion of trace is the percentage separation achieved by each discriminant function: LD1 LD2 LD3 0.6937 0.2138 0.0925 0.6937 + 0.2138 + 0.0925 = 1
lda.arrows <- function(x, myscale = 1, arrow_heads = 0.1, color = "red", tex = 0.75, choices = c(1,2)){
heads <- coef(x)
arrows(x0 = 0, y0 = 0,
x1 = myscale * heads[,choices[1]],
y1 = myscale * heads[,choices[2]], col=color, length = arrow_heads)
text(myscale * heads[,choices], labels = row.names(heads),
cex = tex, col=color, pos=3)
}
classes <- as.numeric(train$crime)
plot(lda.fit, dimen = 2, col = classes, pch = classes)
Super-Bonus
model_predictors <- dplyr::select(train, -crime)
# check the dimensions
dim(model_predictors)
## [1] 404 13
dim(lda.fit$scaling)
## [1] 13 3
# matrix multiplication
matrix_product <- as.matrix(model_predictors) %*% lda.fit$scaling
matrix_product <- as.data.frame(matrix_product)
library(plotly)
##
## Attaching package: 'plotly'
## The following object is masked from 'package:ggplot2':
##
## last_plot
## The following object is masked from 'package:MASS':
##
## select
## The following object is masked from 'package:stats':
##
## filter
## The following object is masked from 'package:graphics':
##
## layout
# 3D plot by crime (test)
plot_ly(x = matrix_product$LD1, y = matrix_product$LD2, z = matrix_product$LD3, type= 'scatter3d', mode='markers', color= train$crime)
## Warning: `arrange_()` is deprecated as of dplyr 0.7.0.
## Please use `arrange()` instead.
## See vignette('programming') for more help
## This warning is displayed once every 8 hours.
## Call `lifecycle::last_warnings()` to see where this warning was generated.
# 3D plot by k means cluster
plot_ly(x = matrix_product$LD1, y = matrix_product$LD2, z = matrix_product$LD3, type= 'scatter3d', mode='markers', color= km$cluster)
The plots (coloring) are very different but the shape is same because the datapoints are the same. The first plot shows the level of crimes and the second shows those datapoints as on what cluster they belong to.